adds r16 = VLE_TITAG_OFFSET, r17
adds r19 = VLE_CCHAIN_OFFSET, r17
;;
- ld8 r22 = [r16]
+ ld8 r24 = [r16]
ld8 r23 = [r19]
;;
lfetch [r23]
- cmp.eq p6,p7 = r20, r22
+ cmp.eq p6,p7 = r20, r24
;;
(p7)mov r17 = r23;
(p7)br.sptk vmx_itlb_loop
ld8 r25 = [r17]
ld8 r27 = [r18]
ld8 r29 = [r28]
+ dep r22 = -1,r24,63,1 //set ti=1
;;
st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
extr.u r19 = r27, 56, 4
+ mf
;;
ld8 r29 = [r16]
ld8 r22 = [r28]
dep r25 = r19, r25, 56, 4
;;
st8 [r16] = r22
- st8 [r28] = r29
+ st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
st8 [r18] = r25
st8 [r17] = r27
;;
+ st8.rel [r28] = r24
itc.i r25
dv_serialize_data
mov r17=cr.isr
adds r16 = VLE_TITAG_OFFSET, r17
adds r19 = VLE_CCHAIN_OFFSET, r17
;;
- ld8 r22 = [r16]
+ ld8 r24 = [r16]
ld8 r23 = [r19]
;;
lfetch [r23]
- cmp.eq p6,p7 = r20, r22
+ cmp.eq p6,p7 = r20, r24
;;
(p7)mov r17 = r23;
(p7)br.sptk vmx_dtlb_loop
ld8 r25 = [r17]
ld8 r27 = [r18]
ld8 r29 = [r28]
+ dep r22 = -1,r24,63,1 //set ti=1
;;
st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
extr.u r19 = r27, 56, 4
+ mf
;;
ld8 r29 = [r16]
ld8 r22 = [r28]
dep r25 = r19, r25, 56, 4
;;
st8 [r16] = r22
- st8 [r28] = r29
+ st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
st8 [r18] = r25
st8 [r17] = r27
- ;;
+ ;;
+ st8.rel [r28] = r24
itc.d r25
dv_serialize_data
mov r17=cr.isr
static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
{
- u64 tag ,len;
+ u64 tag;
ia64_rr rr;
thash_data_t *head, *cch;
pte = pte & ~PAGE_FLAGS_RV_MASK;
cch = cch->next;
}
if (cch) {
- if (cch == head) {
- len = head->len;
- } else {
+ if (cch != head) {
local_irq_disable();
cch->page_flags = head->page_flags;
cch->itir = head->itir;
cch->etag = head->etag;
- len = head->len;
+ head->ti = 1;
local_irq_enable();
}
}
}
local_irq_disable();
*cch = *head;
+ head->ti = 1;
head->next = cch;
- len = cch->len+1;
+ head->len = cch->len + 1;
cch->len = 0;
local_irq_enable();
}
-
+ //here head is invalid
+ wmb();
head->page_flags=pte;
- head->len = len;
head->itir = rr.ps << 2;
- head->etag=tag;
+ *(volatile unsigned long*)&head->etag = tag;
return;
}
struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
unsigned long tag = ia64_ttag (vadr);
- /* No need to first disable the entry, since VHPT is per LP
- and VHPT is TR mapped. */
+ /* Even though VHPT is per VCPU, still need to first disable the entry,
+ * because the processor may support speculative VHPT walk. */
+ vlfe->ti_tag = INVALID_TI_TAG;
+ wmb();
vlfe->itir = logps;
vlfe->page_flags = pte | _PAGE_P;
- vlfe->ti_tag = tag;
+ *(volatile unsigned long*)&vlfe->ti_tag = tag;
}
void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)